Fix up the new non-paging direct_map shadow pagetable mode
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 3 Feb 2006 11:47:38 +0000 (12:47 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 3 Feb 2006 11:47:38 +0000 (12:47 +0100)
for SVM. Rename a vmx_ function in HVM-generic code to
hvm_.

Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c

index 38772b5d41bc3922c90352b1a702db20b6e7dc1b..1a0ec644c121c42ff1fb46dd83a22caf6204ac5e 100644 (file)
@@ -53,7 +53,7 @@ integer_param("hvm_debug", opt_hvm_debug_level);
 
 struct hvm_function_table hvm_funcs;
 
-static void vmx_zap_mmio_range(
+static void hvm_zap_mmio_range(
     struct domain *d, unsigned long pfn, unsigned long nr_pfn)
 {
     unsigned long i, val = INVALID_MFN;
@@ -95,12 +95,10 @@ static void hvm_map_io_shared_page(struct domain *d)
 
     for ( i = 0; i < e820_map_nr; i++ )
     {
-        if (e820entry[i].type == E820_SHARED_PAGE)
-        {
+        if ( e820entry[i].type == E820_SHARED_PAGE )
             gpfn = (e820entry[i].addr >> PAGE_SHIFT);
-        }
         if ( e820entry[i].type == E820_IO )
-            vmx_zap_mmio_range(
+            hvm_zap_mmio_range(
                 d, 
                 e820entry[i].addr >> PAGE_SHIFT,
                 e820entry[i].size >> PAGE_SHIFT);
index 010243f6c7029a3363835f9c2503fd536f4e9399..4a15f8cd6c3db1091a3610293fefcc8c98157abc 100644 (file)
@@ -833,8 +833,11 @@ static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
             va, eip, (unsigned long)regs->error_code);
 //#endif
 
-    if (!svm_paging_enabled(v)) 
+    if ( !svm_paging_enabled(v) )
     {
+        if ( shadow_direct_map_fault(va, regs) ) 
+            return 1;
+
         handle_mmio(va, va);
         TRACE_VMEXIT(2,2);
         return 1;
@@ -1437,6 +1440,9 @@ static int svm_set_cr0(unsigned long value)
             if (old_base_mfn)
                 put_page(mfn_to_page(old_base_mfn));
        }
+#endif
+#if CONFIG_PAGING_LEVELS == 2
+        shadow_direct_map_clean(v);
 #endif
         /* Now arch.guest_table points to machine physical. */
         v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
@@ -2272,10 +2278,8 @@ void walk_shadow_and_guest_pt(unsigned long gva)
 
     gpa = gva_to_gpa( gva );
     printk( "gva = %lx, gpa=%lx, gCR3=%x\n", gva, gpa, (u32)vmcb->cr3 );
-    if( !svm_paging_enabled(v) || mmio_space( gpa ) )
-    {
+    if( !svm_paging_enabled(v) || mmio_space(gpa) )
        return;
-    }
 
     /* let's dump the guest and shadow page info */
 
index 028aef0d16a5340cdd1ea6f06179d073c7c49531..74bef8c251280e019adcdcc2c7162fad5ce147f3 100644 (file)
@@ -417,9 +417,8 @@ void svm_do_launch(struct vcpu *v)
     /* current core is the one we will perform the vmrun on */
     v->arch.hvm_svm.core = core;
     clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
-    if(!asidpool_assign_next(vmcb, 0, core, core)) {
+    if ( !asidpool_assign_next(vmcb, 0, core, core) )
         BUG();
-    }
 
     if (v->vcpu_id == 0)
         hvm_setup_platform(v->domain);
@@ -446,15 +445,13 @@ void svm_do_launch(struct vcpu *v)
         pt = pagetable_get_paddr(v->domain->arch.phys_table);
         printk("%s: phys_table   = %lx\n", __func__, pt);
     }
-    
-    if (svm_paging_enabled(v))
-    {
+
+    shadow_direct_map_init(v);
+
+    if ( svm_paging_enabled(v) )
         vmcb->cr3 = pagetable_get_paddr(v->arch.guest_table);
-    }
     else
-    {
         vmcb->cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
-    }
 
     if (svm_dbg_on) 
     {